# This is a BitKeeper generated patch for the following project: # Project Name: Linux kernel tree # This patch format is intended for GNU patch command version 2.5 or higher. # This patch includes the following deltas: # ChangeSet 1.930.112.42+1.930.107.10 -> 1.930.112.43 # include/asm-ia64/mmu_context.h 1.4.1.1 -> 1.5.1.1 # arch/ia64/kernel/setup.c 1.11.1.1 -> 1.11.1.2 # include/asm-ia64/processor.h 1.16.1.1 -> 1.16.1.2 # arch/ia64/mm/init.c 1.7.2.1 -> 1.7.1.5 # arch/ia64/kernel/acpi.c 1.6.3.8 -> 1.6.1.10 # arch/ia64/config.in 1.13.2.3 -> 1.13.1.6 # arch/ia64/kernel/sys_ia64.c 1.8.1.2 -> 1.9.1.1 # arch/ia64/hp/common/sba_iommu.c 1.1.2.11 -> 1.1.1.8 # diff -Nru a/arch/ia64/config.in b/arch/ia64/config.in --- a/arch/ia64/config.in Wed Oct 8 09:08:59 2003 +++ b/arch/ia64/config.in Wed Oct 8 09:08:59 2003 @@ -23,8 +23,8 @@ define_bool CONFIG_EISA n define_bool CONFIG_MCA n define_bool CONFIG_SBUS n -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n +define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n +define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y choice 'IA-64 processor type' \ "Itanium CONFIG_ITANIUM \ diff -Nru a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c --- a/arch/ia64/hp/common/sba_iommu.c Wed Oct 8 09:08:59 2003 +++ b/arch/ia64/hp/common/sba_iommu.c Wed Oct 8 09:08:59 2003 @@ -133,6 +133,7 @@ #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP) #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP) +#define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP) #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ @@ -1636,6 +1637,7 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = { { ZX1_IOC_ID, "zx1", ioc_zx1_init }, { REO_IOC_ID, "REO" }, + { SX1000_IOC_ID, "sx1000" }, }; static struct ioc * __init diff -Nru a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c --- a/arch/ia64/kernel/acpi.c Wed Oct 8 09:08:59 2003 +++ b/arch/ia64/kernel/acpi.c Wed Oct 8 09:08:59 2003 @@ -631,4 +631,21 @@ return gsi_to_vector(irq); } +int +acpi_register_irq (u32 gsi, u32 polarity, u32 mode) +{ + int vector = 0; + + if (has_8259 && gsi < 16) + return isa_irq_to_vector(gsi); + + if (!iosapic_register_intr) + return 0; + + /* Turn it on */ + vector = iosapic_register_intr(gsi, polarity ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, + mode ? IOSAPIC_EDGE : IOSAPIC_LEVEL); + return vector; +} + #endif /* CONFIG_ACPI_BOOT */ diff -Nru a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c --- a/arch/ia64/kernel/setup.c Wed Oct 8 09:08:59 2003 +++ b/arch/ia64/kernel/setup.c Wed Oct 8 09:08:59 2003 @@ -40,6 +40,7 @@ #include #include #include +#include #ifdef CONFIG_BLK_DEV_RAM # include @@ -58,6 +59,7 @@ struct cpuinfo_ia64 *boot_cpu_data; #else struct cpuinfo_ia64 _cpu_data[NR_CPUS] __attribute__ ((section ("__special_page_section"))); + mmu_gather_t mmu_gathers[NR_CPUS]; #endif unsigned long ia64_cycles_per_usec; @@ -567,6 +569,8 @@ for (cpu = 1; cpu < NR_CPUS; ++cpu) memcpy(my_cpu_data->cpu_data[cpu]->cpu_data, my_cpu_data->cpu_data, sizeof(my_cpu_data->cpu_data)); + my_cpu_data->mmu_gathers = alloc_bootmem_pages_node(BOOT_NODE_DATA(boot_get_local_cnodeid()), + sizeof(mmu_gather_t)); } else { order = get_order(sizeof(struct cpuinfo_ia64)); my_cpu_data = page_address(alloc_pages_node(numa_node_id(), GFP_KERNEL, order)); @@ -576,9 +580,14 @@ order); for (cpu = 0; cpu < NR_CPUS; ++cpu) boot_cpu_data->cpu_data[cpu]->cpu_data[smp_processor_id()] = my_cpu_data; + + my_cpu_data->mmu_gathers = page_address(boot_alloc_pages_node(boot_get_local_cnodeid(), + GFP_KERNEL, + get_order(sizeof(mmu_gather_t))); } #else my_cpu_data = cpu_data(smp_processor_id()); + my_cpu_data->mmu_gathers = &mmu_gathers[smp_processor_id()]; #endif /* diff -Nru a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c --- a/arch/ia64/kernel/sys_ia64.c Wed Oct 8 09:08:59 2003 +++ b/arch/ia64/kernel/sys_ia64.c Wed Oct 8 09:08:59 2003 @@ -61,16 +61,14 @@ } asmlinkage long -ia64_getpriority (int which, int who, long arg2, long arg3, long arg4, long arg5, long arg6, - long arg7, long stack) +ia64_getpriority (int which, int who) { - struct pt_regs *regs = (struct pt_regs *) &stack; extern long sys_getpriority (int, int); long prio; prio = sys_getpriority(which, who); if (prio >= 0) { - regs->r8 = 0; /* ensure negative priority is not mistaken as error code */ + force_successful_syscall_return(); prio = 20 - prio; } return prio; @@ -84,11 +82,9 @@ } asmlinkage unsigned long -ia64_shmat (int shmid, void *shmaddr, int shmflg, long arg3, long arg4, long arg5, long arg6, - long arg7, long stack) +ia64_shmat (int shmid, void *shmaddr, int shmflg) { extern int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr); - struct pt_regs *regs = (struct pt_regs *) &stack; unsigned long raddr; int retval; @@ -96,16 +92,14 @@ if (retval < 0) return retval; - regs->r8 = 0; /* ensure negative addresses are not mistaken as an error code */ + force_successful_syscall_return(); return raddr; } asmlinkage unsigned long -ia64_brk (unsigned long brk, long arg1, long arg2, long arg3, - long arg4, long arg5, long arg6, long arg7, long stack) +ia64_brk (unsigned long brk) { extern int vm_enough_memory (long pages); - struct pt_regs *regs = (struct pt_regs *) &stack; unsigned long rlim, retval, newbrk, oldbrk; struct mm_struct *mm = current->mm; @@ -155,7 +149,7 @@ out: retval = mm->brk; up_write(&mm->mmap_sem); - regs->r8 = 0; /* ensure large retval isn't mistaken as error code */ + force_successful_syscall_return(); return retval; } @@ -232,29 +226,23 @@ * of) files that are larger than the address space of the CPU. */ asmlinkage unsigned long -sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff, - long arg6, long arg7, long stack) +sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) { - struct pt_regs *regs = (struct pt_regs *) &stack; - addr = do_mmap2(addr, len, prot, flags, fd, pgoff); if (!IS_ERR((void *) addr)) - regs->r8 = 0; /* ensure large addresses are not mistaken as failures... */ + force_successful_syscall_return(); return addr; } asmlinkage unsigned long -sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, - int fd, long off, long arg6, long arg7, long stack) +sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, long off) { - struct pt_regs *regs = (struct pt_regs *) &stack; - if ((off & ~PAGE_MASK) != 0) return -EINVAL; addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); if (!IS_ERR((void *) addr)) - regs->r8 = 0; /* ensure large addresses are not mistaken as failures... */ + force_successful_syscall_return(); return addr; } diff -Nru a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c --- a/arch/ia64/mm/init.c Wed Oct 8 09:08:59 2003 +++ b/arch/ia64/mm/init.c Wed Oct 8 09:08:59 2003 @@ -252,7 +252,7 @@ pte_t *pte; if (!PageReserved(page)) - printk("put_gate_page: gate page at 0x%p not in reserved memory\n", + printk(KERN_ERR "put_gate_page: gate page at 0x%p not in reserved memory\n", page_address(page)); pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ @@ -557,7 +557,7 @@ efi_memmap_walk(create_mem_map_page_table, 0); free_area_init_node(0, NULL, vmem_map, zones_size, 0, zholes_size); - printk("Virtual mem_map starts at 0x%p\n", mem_map); + printk(KERN_INFO "Virtual mem_map starts at 0x%p\n", mem_map); } #endif } diff -Nru a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h --- a/include/asm-ia64/mmu_context.h Wed Oct 8 09:08:59 2003 +++ b/include/asm-ia64/mmu_context.h Wed Oct 8 09:08:59 2003 @@ -2,8 +2,8 @@ #define _ASM_IA64_MMU_CONTEXT_H /* - * Copyright (C) 1998-2001 Hewlett-Packard Co - * Copyright (C) 1998-2001 David Mosberger-Tang + * Copyright (C) 1998-2002 Hewlett-Packard Co + * David Mosberger-Tang */ /* @@ -13,8 +13,6 @@ * consider the region number when performing a TLB lookup, we need to assign a unique * region id to each region in a process. We use the least significant three bits in a * region id for this purpose. - * - * Copyright (C) 1998-2001 David Mosberger-Tang */ #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ @@ -44,6 +42,23 @@ { } +/* + * When the context counter wraps around all TLBs need to be flushed because an old + * context number might have been reused. This is signalled by the ia64_need_tlb_flush + * per-CPU variable, which is checked in the routine below. Called by activate_mm(). + * + */ +static inline void +delayed_tlb_flush (void) +{ + extern void local_flush_tlb_all (void); + + if (unlikely(local_cpu_data->need_tlb_flush)) { + local_flush_tlb_all(); + local_cpu_data->need_tlb_flush = 0; + } +} + static inline mm_context_t get_mmu_context (struct mm_struct *mm) { @@ -131,6 +146,8 @@ static inline void activate_mm (struct mm_struct *prev, struct mm_struct *next) { + delayed_tlb_flush(); + /* * We may get interrupts here, but that's OK because interrupt handlers cannot * touch user-space. diff -Nru a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h --- a/include/asm-ia64/processor.h Wed Oct 8 09:08:59 2003 +++ b/include/asm-ia64/processor.h Wed Oct 8 09:08:59 2003 @@ -168,6 +168,7 @@ __u32 ptce_count[2]; __u32 ptce_stride[2]; struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */ + void *mmu_gathers; # ifdef CONFIG_PERFMON unsigned long pfm_syst_info; # endif